var runtime.debug

145 uses

	runtime (current package)
		arena.go#L816: 	if debug.malloc {
		cgocall.go#L431: 	if debug.dataindependenttiming == 1 && gp.m.isextra {
		cgocall.go#L452: 	if debug.dataindependenttiming == 1 && !ditAlreadySet {
		cgocall.go#L538: 	if !goexperiment.CgoCheck2 && debug.cgocheck == 0 {
		cgocall.go#L789: 	if !goexperiment.CgoCheck2 && debug.cgocheck == 0 {
		cgroup_linux.go#L52: 	if debug.containermaxprocs > 0 {
		chan.go#L822: 	async := debug.asynctimerchan.Load() != 0
		chan.go#L840: 		async := debug.asynctimerchan.Load() != 0
		malloc.go#L1032: 	if debug.malloc {
		malloc.go#L1104: 	if debug.malloc {
		malloc.go#L1639: 	if debug.sbrk != 0 {
		malloc.go#L1687: 	if debug.checkfinalizers != 0 && elemsize == 0 {
		mbitmap.go#L1324: 		if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
		mbitmap.go#L1343: 		if debug.invalidptr != 0 {
		mcheckmark.go#L180: 		if debug.checkfinalizers > 1 {
		mcheckmark.go#L191: 		if debug.checkfinalizers > 1 {
		mcleanup.go#L94: 	if debug.sbrk != 0 {
		mcleanup.go#L124: 	if debug.checkfinalizers != 0 {
		mcleanup.go#L205: 	if debug.checkfinalizers != 0 {
		mem_linux.go#L51: 	if debug.madvdontneed != 0 && advise != madviseUnsupported {
		mem_linux.go#L80: 	if debug.harddecommit > 0 {
		mem_linux.go#L90: 	if debug.harddecommit > 0 {
		mem_linux.go#L187: 	if debug.disablethp != 0 {
		mfinal.go#L449: 	if debug.sbrk != 0 {
		mfinal.go#L485: 			if debug.checkfinalizers != 0 {
		mfinal.go#L541: 		if debug.checkfinalizers != 0 {
		mgc.go#L694: 	if debug.gcstoptheworld == 1 {
		mgc.go#L696: 	} else if debug.gcstoptheworld == 2 {
		mgc.go#L1072: 		if debug.gccheckmark > 0 {
		mgc.go#L1075: 		if debug.checkfinalizers > 0 {
		mgc.go#L1224: 	if debug.gctrace > 1 {
		mgc.go#L1236: 		if debug.gctrace > 1 {
		mgc.go#L1254: 	if debug.gctrace > 0 {
		mgc.go#L1297: 		if debug.gctrace > 1 {
		mgc.go#L1306: 	if debug.checkfinalizers > 0 {
		mgc.go#L1636: 	if debug.gccheckmark > 0 {
		mgc.go#L1659: 		if debug.gccheckmark > 0 {
		mgcmark.go#L1534: 	if debug.gctrace > 1 {
		mgcmark.go#L1672: 		if debug.checkfinalizers > 1 {
		mgcmark.go#L1676: 		if debug.gccheckmark > 0 && span.isFree(objIndex) {
		mgcpacer.go#L422: 	if debug.gcstoptheworld > 0 {
		mgcpacer.go#L458: 	if debug.gcpacertrace > 0 {
		mgcpacer.go#L676: 	if debug.gcpacertrace > 0 {
		mgcsweep.go#L176: 			if debug.gcpacertrace > 0 {
		mgcsweep.go#L427: 		if debug.scavtrace > 0 {
		mgcsweep.go#L617: 	if traceAllocFreeEnabled() || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
		mgcsweep.go#L631: 				if debug.clobberfree != 0 {
		mgcsweep.go#L829: 			if debug.efence > 0 {
		mheap.go#L1773: 	if debug.scavtrace > 0 {
		mheap.go#L2557: 	if debug.sbrk != 0 {
		mprof.go#L447: 	nstk := callers(5, mp.profStack[:debug.profstackdepth])
		mprof.go#L534: 	if debug.profstackdepth == 0 {
		mprof.go#L713: 	if debug.profstackdepth == 0 {
		mprof.go#L769: 	nstk := int(debug.profstackdepth)
		panic.go#L735: 		if debug.panicnil.Load() != 1 {
		panic.go#L1377: 		if debug.schedtrace > 0 || debug.scheddetail > 0 {
		preempt.go#L223: 			if preemptMSupported && debug.asyncpreemptoff == 0 && needAsync {
		proc.go#L197: 	if debug.inittrace != 0 {
		proc.go#L375: 		if debug.gctrace > 0 {
		proc.go#L1041: 	if debug.profstackdepth == 0 {
		proc.go#L1060: 	return make([]uintptr, 1+maxSkip+debug.profstackdepth)
		proc.go#L1065: func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
		proc.go#L1152: 	if debug.dontfreezetheworld > 0 {
		proc.go#L1937: 	if debug.dataindependenttiming == 1 {
		proc.go#L4159: 	if debug.dontfreezetheworld > 0 && freezing.Load() {
		proc.go#L5289: 	if debug.tracebackancestors <= 0 || callergp.goid == 0 {
		proc.go#L5297: 	if n > debug.tracebackancestors {
		proc.go#L5298: 		n = debug.tracebackancestors
		proc.go#L6266: 		if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
		proc.go#L6349: 		if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
		proc.go#L6373: 		if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
		proc.go#L6375: 			schedtrace(debug.scheddetail > 0)
		proc.go#L6514: 	if preemptMSupported && debug.asyncpreemptoff == 0 {
		proc.go#L6692: 	if debug.updatemaxprocs == 0 {
		runtime1.go#L310: var debug struct {
		runtime1.go#L370: 	{name: "adaptivestackstart", value: &debug.adaptivestackstart},
		runtime1.go#L371: 	{name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
		runtime1.go#L372: 	{name: "asynctimerchan", atomic: &debug.asynctimerchan},
		runtime1.go#L373: 	{name: "cgocheck", value: &debug.cgocheck},
		runtime1.go#L374: 	{name: "clobberfree", value: &debug.clobberfree},
		runtime1.go#L375: 	{name: "containermaxprocs", value: &debug.containermaxprocs, def: 1},
		runtime1.go#L376: 	{name: "dataindependenttiming", value: &debug.dataindependenttiming},
		runtime1.go#L377: 	{name: "decoratemappings", value: &debug.decoratemappings, def: 1},
		runtime1.go#L378: 	{name: "disablethp", value: &debug.disablethp},
		runtime1.go#L379: 	{name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
		runtime1.go#L380: 	{name: "checkfinalizers", value: &debug.checkfinalizers},
		runtime1.go#L381: 	{name: "efence", value: &debug.efence},
		runtime1.go#L382: 	{name: "gccheckmark", value: &debug.gccheckmark},
		runtime1.go#L383: 	{name: "gcpacertrace", value: &debug.gcpacertrace},
		runtime1.go#L384: 	{name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
		runtime1.go#L385: 	{name: "gcstoptheworld", value: &debug.gcstoptheworld},
		runtime1.go#L386: 	{name: "gctrace", value: &debug.gctrace},
		runtime1.go#L387: 	{name: "harddecommit", value: &debug.harddecommit},
		runtime1.go#L388: 	{name: "inittrace", value: &debug.inittrace},
		runtime1.go#L389: 	{name: "invalidptr", value: &debug.invalidptr},
		runtime1.go#L390: 	{name: "madvdontneed", value: &debug.madvdontneed},
		runtime1.go#L391: 	{name: "panicnil", atomic: &debug.panicnil},
		runtime1.go#L392: 	{name: "profstackdepth", value: &debug.profstackdepth, def: 128},
		runtime1.go#L393: 	{name: "sbrk", value: &debug.sbrk},
		runtime1.go#L394: 	{name: "scavtrace", value: &debug.scavtrace},
		runtime1.go#L395: 	{name: "scheddetail", value: &debug.scheddetail},
		runtime1.go#L396: 	{name: "schedtrace", value: &debug.schedtrace},
		runtime1.go#L397: 	{name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
		runtime1.go#L398: 	{name: "traceallocfree", atomic: &debug.traceallocfree},
		runtime1.go#L399: 	{name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
		runtime1.go#L400: 	{name: "tracebackancestors", value: &debug.tracebackancestors},
		runtime1.go#L401: 	{name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
		runtime1.go#L402: 	{name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1},
		runtime1.go#L407: 	debug.cgocheck = 1
		runtime1.go#L408: 	debug.invalidptr = 1
		runtime1.go#L409: 	debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
		runtime1.go#L419: 		debug.madvdontneed = 1
		runtime1.go#L421: 	debug.traceadvanceperiod = defaultTraceAdvancePeriod
		runtime1.go#L440: 	debug.malloc = (debug.inittrace | debug.sbrk | debug.checkfinalizers) != 0
		runtime1.go#L441: 	debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
		runtime1.go#L456: 	if debug.gccheckmark > 0 {
		runtime1.go#L457: 		debug.asyncpreemptoff = 1
		runtime1.go#L553: 	if debug.cgocheck > 1 {
		set_vma_name_linux.go#L23: 	if debug.decoratemappings == 0 || !setVMANameSupported() {
		signal_unix.go#L447: 		if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
		signal_unix.go#L685: 	if sig == sigPreempt && debug.asyncpreemptoff == 0 && !delayedSignal {
		signal_unix.go#L1391: 	if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
		stack.go#L359: 	if debug.efence != 0 || stackFromSystem != 0 {
		stack.go#L477: 	if debug.efence != 0 || stackFromSystem != 0 {
		stack.go#L478: 		if debug.efence != 0 || stackFaultOnFree != 0 {
		stack.go#L676: 			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
		stack.go#L1258: 	if debug.gcshrinkstackoff > 0 {
		stack.go#L1371: 	if debug.adaptivestackstart == 0 {
		synctest.go#L172: 	if debug.asynctimerchan.Load() != 0 {
		time.go#L498: 	async := debug.asynctimerchan.Load() != 0
		time.go#L579: 	async := debug.asynctimerchan.Load() != 0
		time.go#L1168: 	async := debug.asynctimerchan.Load() != 0
		trace.go#L260: 	if debug.traceallocfree.Load() != 0 {
		trace.go#L262: 		trace.debugMalloc = debug.malloc
		trace.go#L264: 		debug.malloc = true
		trace.go#L635: 			debug.malloc = trace.debugMalloc
		trace.go#L993: 			s.timer.sleep(int64(debug.traceadvanceperiod))
		tracestack.go#L43: 	if debug.traceCheckStackOwnership != 0 && gp != nil {
		tracestack.go#L257: 	return debug.tracefpunwindoff != 0 || (goarch.ArchFamily != goarch.AMD64 && goarch.ArchFamily != goarch.ARM64)